error_code &= 3;
error_code |= (regs->xcs & 2) << 1;
- if ( flush_page_update_queue() != 0 )
- return;
+ /* ensure all updates have completed */
+ flush_page_update_queue();
/*
* We fault-in kernel-space virtual memory on-demand. The
#include <asm-xen/hypervisor.h>
#include <asm-xen/multicall.h>
#include <asm-xen/balloon.h>
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
#include <linux/percpu.h>
+#endif
/*
* This suffices to protect us if we ever move to SMP domains.
#include <asm-xen/xen-public/physdev.h>
#include <asm-xen/ctrl_if.h>
#include <asm-xen/hypervisor.h>
-#define XEN_EVTCHN_MASK_OPS
#include <asm-xen/evtchn.h>
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
DEFINE_PER_CPU(int, virq_to_irq[NR_VIRQS]);
/* evtchn <-> IPI mapping. */
+#ifndef NR_IPIS // XXX SMH: temp fix for 2.4
+#define NR_IPIS 1
+#endif
DEFINE_PER_CPU(int, ipi_to_evtchn[NR_IPIS]);
/* Reference counts for bindings to IRQs. */
/* Entry point for notifications into the userland character device. */
void evtchn_device_upcall(int port);
-#ifdef XEN_EVTCHN_MASK_OPS
-
static inline void mask_evtchn(int port)
{
shared_info_t *s = HYPERVISOR_shared_info;
}
}
-#endif /* XEN_EVTCHN_MASK_OPS */
-
static inline void clear_evtchn(int port)
{
shared_info_t *s = HYPERVISOR_shared_info;
void xen_machphys_update(unsigned long mfn, unsigned long pfn);
void _flush_page_update_queue(void);
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
+/*
+** XXX SMH: 2.4 doesn't have percpu.h (or support SMP guests) so just
+** include sufficient #defines to allow the below to build.
+*/
+#define DEFINE_PER_CPU(type, name) \
+ __typeof__(type) per_cpu__##name
+
+#define per_cpu(var, cpu) (*((void)cpu, &per_cpu__##var))
+#define __get_cpu_var(var) per_cpu__##var
+#define DECLARE_PER_CPU(type, name) extern __typeof__(type) per_cpu__##name
+
+#define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var)
+#define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(per_cpu__##var)
+#endif /* linux < 2.6.0 */
+
#define flush_page_update_queue() do { \
DECLARE_PER_CPU(unsigned int, mmu_update_queue_idx); \
if (per_cpu(mmu_update_queue_idx, smp_processor_id())) \